summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWollnashorn <Wollnashorn@users.noreply.github.com>2023-04-05 01:29:46 +0200
committerWollnashorn <Wollnashorn@users.noreply.github.com>2023-04-08 16:12:30 +0200
commit780240e6979b198e7bd10feaad5399b8b4b63762 (patch)
treec145f48c66cf003e618cefc63496e3f5b7637f56
parentMerge pull request #10024 from german77/crysis (diff)
downloadyuzu-780240e6979b198e7bd10feaad5399b8b4b63762.tar
yuzu-780240e6979b198e7bd10feaad5399b8b4b63762.tar.gz
yuzu-780240e6979b198e7bd10feaad5399b8b4b63762.tar.bz2
yuzu-780240e6979b198e7bd10feaad5399b8b4b63762.tar.lz
yuzu-780240e6979b198e7bd10feaad5399b8b4b63762.tar.xz
yuzu-780240e6979b198e7bd10feaad5399b8b4b63762.tar.zst
yuzu-780240e6979b198e7bd10feaad5399b8b4b63762.zip
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_image.cpp29
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_image.cpp39
-rw-r--r--src/shader_recompiler/profile.h4
-rw-r--r--src/video_core/renderer_opengl/gl_device.cpp1
-rw-r--r--src/video_core/renderer_opengl/gl_device.h5
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.cpp1
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.cpp1
-rw-r--r--src/video_core/vulkan_common/vulkan_device.cpp1
-rw-r--r--src/video_core/vulkan_common/vulkan_device.h5
9 files changed, 86 insertions, 0 deletions
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp
index f335c8af0..418505475 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp
@@ -143,6 +143,21 @@ IR::Inst* PrepareSparse(IR::Inst& inst) {
}
return sparse_inst;
}
+
+std::string ImageGatherSubpixelOffset(const IR::TextureInstInfo& info, std::string_view texture,
+ std::string_view coords) {
+ switch (info.type) {
+ case TextureType::Color2D:
+ case TextureType::Color2DRect:
+ return fmt::format("{}+vec2(0.001953125)/vec2(textureSize({}, 0))", coords, texture);
+ case TextureType::ColorArray2D:
+ case TextureType::ColorCube:
+ return fmt::format("vec3({0}.xy+vec2(0.001953125)/vec2(textureSize({1}, 0)),{0}.z)", coords,
+ texture);
+ default:
+ return std::string{coords};
+ }
+}
} // Anonymous namespace
void EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
@@ -340,6 +355,13 @@ void EmitImageGather(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING");
ctx.AddU1("{}=true;", *sparse_inst);
}
+ std::string coords_with_subpixel_offset;
+ if (ctx.profile.need_gather_subpixel_offset) {
+ // Apply a subpixel offset of 1/512 the texel size of the texture to ensure same rounding on
+ // AMD hardware as on Maxwell or other Nvidia architectures.
+ coords_with_subpixel_offset = ImageGatherSubpixelOffset(info, texture, coords);
+ coords = coords_with_subpixel_offset;
+ }
if (!sparse_inst || !supports_sparse) {
if (offset.IsEmpty()) {
ctx.Add("{}=textureGather({},{},int({}));", texel, texture, coords,
@@ -387,6 +409,13 @@ void EmitImageGatherDref(EmitContext& ctx, IR::Inst& inst, const IR::Value& inde
LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING");
ctx.AddU1("{}=true;", *sparse_inst);
}
+ std::string coords_with_subpixel_offset;
+ if (ctx.profile.need_gather_subpixel_offset) {
+ // Apply a subpixel offset of 1/512 the texel size of the texture to ensure same rounding on
+ // AMD hardware as on Maxwell or other Nvidia architectures.
+ coords_with_subpixel_offset = ImageGatherSubpixelOffset(info, texture, coords);
+ coords = coords_with_subpixel_offset;
+ }
if (!sparse_inst || !supports_sparse) {
if (offset.IsEmpty()) {
ctx.Add("{}=textureGather({},{},{});", texel, texture, coords, dref);
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
index 02073c420..968901d42 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
@@ -261,6 +261,39 @@ Id BitTest(EmitContext& ctx, Id mask, Id bit) {
const Id bit_value{ctx.OpBitwiseAnd(ctx.U32[1], shifted, ctx.Const(1u))};
return ctx.OpINotEqual(ctx.U1, bit_value, ctx.u32_zero_value);
}
+
+Id ImageGatherSubpixelOffset(EmitContext& ctx, const IR::TextureInstInfo& info, Id texture,
+ Id coords) {
+ // Apply a subpixel offset of 1/512 the texel size of the texture to ensure same rounding on
+ // AMD hardware as on Maxwell or other Nvidia architectures.
+ const auto calculate_offset{[&](size_t dim) -> std::array<Id, 2> {
+ const Id nudge{ctx.Const(0x1p-9f)};
+ const Id image_size{ctx.OpImageQuerySizeLod(ctx.U32[dim], texture, ctx.u32_zero_value)};
+ const Id offset_x{ctx.OpFDiv(
+ ctx.F32[1], nudge,
+ ctx.OpConvertUToF(ctx.F32[1], ctx.OpCompositeExtract(ctx.U32[1], image_size, 0)))};
+ const Id offset_y{ctx.OpFDiv(
+ ctx.F32[1], nudge,
+ ctx.OpConvertUToF(ctx.F32[1], ctx.OpCompositeExtract(ctx.U32[1], image_size, 1)))};
+ return {ctx.OpFAdd(ctx.F32[1], ctx.OpCompositeExtract(ctx.F32[1], coords, 0), offset_x),
+ ctx.OpFAdd(ctx.F32[1], ctx.OpCompositeExtract(ctx.F32[1], coords, 1), offset_y)};
+ }};
+ switch (info.type) {
+ case TextureType::Color2D:
+ case TextureType::Color2DRect: {
+ const auto offset{calculate_offset(2)};
+ return ctx.OpCompositeConstruct(ctx.F32[2], offset[0], offset[1]);
+ }
+ case TextureType::ColorArray2D:
+ case TextureType::ColorCube: {
+ const auto offset{calculate_offset(3)};
+ return ctx.OpCompositeConstruct(ctx.F32[3], offset[0], offset[1],
+ ctx.OpCompositeExtract(ctx.F32[1], coords, 2));
+ }
+ default:
+ return coords;
+ }
+}
} // Anonymous namespace
Id EmitBindlessImageSampleImplicitLod(EmitContext&) {
@@ -423,6 +456,9 @@ Id EmitImageGather(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id
const IR::Value& offset, const IR::Value& offset2) {
const auto info{inst->Flags<IR::TextureInstInfo>()};
const ImageOperands operands(ctx, offset, offset2);
+ if (ctx.profile.need_gather_subpixel_offset) {
+ coords = ImageGatherSubpixelOffset(ctx, info, TextureImage(ctx, info, index), coords);
+ }
return Emit(&EmitContext::OpImageSparseGather, &EmitContext::OpImageGather, ctx, inst,
ctx.F32[4], Texture(ctx, info, index), coords, ctx.Const(info.gather_component),
operands.MaskOptional(), operands.Span());
@@ -432,6 +468,9 @@ Id EmitImageGatherDref(EmitContext& ctx, IR::Inst* inst, const IR::Value& index,
const IR::Value& offset, const IR::Value& offset2, Id dref) {
const auto info{inst->Flags<IR::TextureInstInfo>()};
const ImageOperands operands(ctx, offset, offset2);
+ if (ctx.profile.need_gather_subpixel_offset) {
+ coords = ImageGatherSubpixelOffset(ctx, info, TextureImage(ctx, info, index), coords);
+ }
return Emit(&EmitContext::OpImageSparseDrefGather, &EmitContext::OpImageDrefGather, ctx, inst,
ctx.F32[4], Texture(ctx, info, index), coords, dref, operands.MaskOptional(),
operands.Span());
diff --git a/src/shader_recompiler/profile.h b/src/shader_recompiler/profile.h
index 253e0d0bd..31390e869 100644
--- a/src/shader_recompiler/profile.h
+++ b/src/shader_recompiler/profile.h
@@ -52,6 +52,10 @@ struct Profile {
bool need_declared_frag_colors{};
/// Prevents fast math optimizations that may cause inaccuracies
bool need_fastmath_off{};
+ /// Some GPU vendors use a lower fixed point format of 16.8 when calculating pixel coordinates
+ /// in the ImageGather instruction than the Maxwell architecture does. Applying an offset does
+ /// fix this mismatching rounding behaviour.
+ bool need_gather_subpixel_offset{};
/// OpFClamp is broken and OpFMax + OpFMin should be used instead
bool has_broken_spirv_clamp{};
diff --git a/src/video_core/renderer_opengl/gl_device.cpp b/src/video_core/renderer_opengl/gl_device.cpp
index 22ed16ebf..d36a0a7a1 100644
--- a/src/video_core/renderer_opengl/gl_device.cpp
+++ b/src/video_core/renderer_opengl/gl_device.cpp
@@ -169,6 +169,7 @@ Device::Device(Core::Frontend::EmuWindow& emu_window) {
has_draw_texture = GLAD_GL_NV_draw_texture;
warp_size_potentially_larger_than_guest = !is_nvidia && !is_intel;
need_fastmath_off = is_nvidia;
+ need_gather_subpixel_offset = is_amd;
can_report_memory = GLAD_GL_NVX_gpu_memory_info;
// At the moment of writing this, only Nvidia's driver optimizes BufferSubData on exclusive
diff --git a/src/video_core/renderer_opengl/gl_device.h b/src/video_core/renderer_opengl/gl_device.h
index 3ff8cad83..e8104c4de 100644
--- a/src/video_core/renderer_opengl/gl_device.h
+++ b/src/video_core/renderer_opengl/gl_device.h
@@ -160,6 +160,10 @@ public:
return need_fastmath_off;
}
+ bool NeedsGatherSubpixelOffset() const {
+ return need_gather_subpixel_offset;
+ }
+
bool HasCbufFtouBug() const {
return has_cbuf_ftou_bug;
}
@@ -225,6 +229,7 @@ private:
bool has_draw_texture{};
bool warp_size_potentially_larger_than_guest{};
bool need_fastmath_off{};
+ bool need_gather_subpixel_offset{};
bool has_cbuf_ftou_bug{};
bool has_bool_ref_bug{};
bool can_report_memory{};
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp
index 479bb8ba3..b40aa6f5e 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp
@@ -218,6 +218,7 @@ ShaderCache::ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindo
.lower_left_origin_mode = true,
.need_declared_frag_colors = true,
.need_fastmath_off = device.NeedsFastmathOff(),
+ .need_gather_subpixel_offset = device.NeedsGatherSubpixelOffset(),
.has_broken_spirv_clamp = true,
.has_broken_unsigned_image_offsets = true,
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index 0684cceed..f51257267 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -329,6 +329,7 @@ PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, const Device& device
.lower_left_origin_mode = false,
.need_declared_frag_colors = false,
+ .need_gather_subpixel_offset = device.NeedsGatherSubpixelOffset(),
.has_broken_spirv_clamp = driver_id == VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS,
.has_broken_spirv_position_input = driver_id == VK_DRIVER_ID_QUALCOMM_PROPRIETARY,
diff --git a/src/video_core/vulkan_common/vulkan_device.cpp b/src/video_core/vulkan_common/vulkan_device.cpp
index 6f288b3f8..0939b62c9 100644
--- a/src/video_core/vulkan_common/vulkan_device.cpp
+++ b/src/video_core/vulkan_common/vulkan_device.cpp
@@ -431,6 +431,7 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
"AMD GCN4 and earlier have broken VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT");
has_broken_cube_compatibility = true;
}
+ need_gather_subpixel_offset = true;
}
if (extensions.sampler_filter_minmax && is_amd) {
// Disable ext_sampler_filter_minmax on AMD GCN4 and lower as it is broken.
diff --git a/src/video_core/vulkan_common/vulkan_device.h b/src/video_core/vulkan_common/vulkan_device.h
index 41b5da18a..50e95bcca 100644
--- a/src/video_core/vulkan_common/vulkan_device.h
+++ b/src/video_core/vulkan_common/vulkan_device.h
@@ -554,6 +554,10 @@ public:
return features.robustness2.nullDescriptor;
}
+ bool NeedsGatherSubpixelOffset() const {
+ return need_gather_subpixel_offset;
+ }
+
u32 GetMaxVertexInputAttributes() const {
return properties.properties.limits.maxVertexInputAttributes;
}
@@ -664,6 +668,7 @@ private:
bool must_emulate_bgr565{}; ///< Emulates BGR565 by swizzling RGB565 format.
bool dynamic_state3_blending{}; ///< Has all blending features of dynamic_state3.
bool dynamic_state3_enables{}; ///< Has all enables features of dynamic_state3.
+ bool need_gather_subpixel_offset{}; ///< Needs offset at ImageGather for correct rounding.
u64 device_access_memory{}; ///< Total size of device local memory in bytes.
u32 sets_per_pool{}; ///< Sets per Description Pool